Pipeline
Show the code
targets::tar_visnetwork(targets_only =T)
Warning message:
<anonymous>: ... may be used in an incorrect context:
class_metrics(group_by(add_pred_class(preds, threshold = .x),
threshold = .x), truth = {
{
outcome
}
}, estimate = .pred_class, event_level = "second", ...)
Models
Show the code
averageweight_fit =
vetiver_pin_read(
model_board,
"bgg_averageweight_"
)
average_fit =
vetiver_pin_read(
model_board,
"bgg_average_"
)
usersrated_fit =
vetiver_pin_read(
model_board,
"bgg_usersrated_"
)
hurdle_fit =
vetiver_pin_read(
model_board,
"bgg_hurdle_"
)
Assessment
Show the code
valid_predictions |>
pivot_outcomes() |>
left_join(
games |>
bggUtils:::unnest_outcomes() |>
select(game_id, usersrated),
by = join_by(game_id)
) |>
plot_predictions(alpha = usersrated)+
theme(legend.title = element_text())
Show the code
targets_tracking_details(metrics = valid_metrics,
details = details) |>
select(model, minratings, outcome, any_of(c("rmse", "mae", "mape", "rsq", "ccc"))) |>
filter(minratings == 25) |>
select(minratings, everything()) |>
gt::gt() |>
gt::tab_options(quarto.disable_processing = T) |>
gtExtras::gt_theme_espn()
| minratings |
model |
outcome |
rmse |
mae |
mape |
rsq |
ccc |
| 25 |
lightgbm |
average |
0.692 |
0.509 |
7.582 |
0.282 |
0.463 |
| 25 |
lightgbm |
averageweight |
0.457 |
0.347 |
19.210 |
0.665 |
0.804 |
| 25 |
lightgbm+lightgbm |
bayesaverage |
0.293 |
0.170 |
2.823 |
0.432 |
0.646 |
| 25 |
lightgbm |
usersrated |
1565.092 |
461.642 |
184.381 |
0.229 |
0.476 |
Features
Show the code
average_plot =
average_fit |>
extract_vetiver_features() |>
plot_model_features()+
labs(title = 'Average Rating')
averageweight_plot =
averageweight_fit |>
extract_vetiver_features() |>
plot_model_features()+
labs(title = 'Average Weight')
usersrated_plot =
usersrated_fit |>
extract_vetiver_features() |>
plot_model_features()+
labs(title = 'Users Rated')
Predictions
Show the code
# predict games
predictions =
upcoming_games |>
impute_averageweight(
model = averageweight_fit
) |>
predict_hurdle(
model = hurdle_fit,
threshold = hurdle_threshold
) |>
predict_bayesaverage(
average_model = average_fit,
usersrated_model = usersrated_fit
)
Predictions for games expected to achieve at least 25 ratings.
Show the code
# table
predictions |>
filter(yearpublished >= 2024) |>
filter(.pred_hurdle_class == 'yes') |>
select(-starts_with(".pred_hurdle")) |>
# this goddamn bah humbug game
filter(game_id != 388225) |>
predictions_dt(games = games) |>
add_colors()
Probabilities for games on whether they will achieve 25 ratings.
Show the code
predictions |>
filter(yearpublished >= 2024) |>
filter(.pred_hurdle_class == 'yes') |>
arrange(desc(.pred_hurdle_yes)) |>
filter(!is.na(thumbnail)) |>
mutate(name = make_hyperlink(make_bgg_link(game_id),
mytext = paste(name, paste0("(",yearpublished, ")")))) |>
mutate(Image = make_image_link(thumbnail),
Game = name,
Description = stringr::str_trunc(description, width = 150),
`Pr(Hurdle)` = round(.pred_hurdle_yes, 3),
Hurdle = hurdle,
`Ratings` = usersrated,
.keep = 'none') |>
DT::datatable(escape=F,
rownames = F,
extensions = c('Responsive'),
class = list(stripe =F),
filter = list(position = 'top'),
options = list(pageLength = 15,
initComplete = htmlwidgets::JS(
"function(settings, json) {",
paste0("$(this.api().table().container()).css({'font-size': '", '10pt', "'});"),
"}"),
scrollX=F,
columnDefs = list(
list(className = 'dt-center',
visible=T,
targets = c("Image", "Pr(Hurdle)", "Hurdle", "Ratings")
)
)
)
)